From 1676e36c5d20c179effb70a47ed3a61c7e3fb86a Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Fri, 27 Jul 2007 09:43:07 +0100 Subject: [PATCH] hvm: Clean up CR0 handling. Upper 32 bits should #GP if set. Lower 32 bits should have reserved bits silently cleared. Check Intel VMX MSRs to check for compatibility with our CR0 requirements. Signed-off-by: Eric E Liu Signed-off-by: Keir Fraser --- xen/arch/x86/hvm/svm/svm.c | 11 +++++++++++ xen/arch/x86/hvm/vmx/vmcs.c | 15 +++++++++++++++ xen/arch/x86/hvm/vmx/vmx.c | 11 +++++++++++ xen/include/asm-x86/hvm/hvm.h | 7 +++++++ 4 files changed, 44 insertions(+) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index e9d3d0162f..e7842f3e18 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1668,6 +1668,17 @@ static int svm_set_cr0(unsigned long value) HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value); + if ( (u32)value != value ) + { + HVM_DBG_LOG(DBG_LEVEL_1, + "Guest attempts to set upper 32 bits in CR0: %lx", + value); + svm_inject_exception(v, TRAP_gp_fault, 1, 0); + return 0; + } + + value &= HVM_CR0_GUEST_RESERVED_BITS; + /* ET is reserved and should be always be 1. */ value |= X86_CR0_ET; diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 17d3724bc8..d4001b70ba 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -240,9 +240,24 @@ int vmx_cpu_up(void) { u32 eax, edx; int cpu = smp_processor_id(); + u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1; BUG_ON(!(read_cr4() & X86_CR4_VMXE)); + /* + * Ensure the current processor operating mode meets + * the requred CRO fixed bits in VMX operation. + */ + cr0 = read_cr0(); + rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0); + rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1); + if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) ) + { + printk("CPU%d: some settings of host CR0 are " + "not allowed in VMX operation.\n", cpu); + return 0; + } + rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK ) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 928c4faff1..9aea14cfd0 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2200,6 +2200,17 @@ static int vmx_set_cr0(unsigned long value) HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value); + if ( (u32)value != value ) + { + HVM_DBG_LOG(DBG_LEVEL_1, + "Guest attempts to set upper 32 bits in CR0: %lx", + value); + vmx_inject_hw_exception(v, TRAP_gp_fault, 0); + return 0; + } + + value &= HVM_CR0_GUEST_RESERVED_BITS; + /* ET is reserved and should be always be 1. */ value |= X86_CR0_ET; diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 275a5b52c0..30884f58a3 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -300,6 +300,13 @@ static inline int hvm_event_injection_faulted(struct vcpu *v) return hvm_funcs.event_injection_faulted(v); } +/* These reserved bits in lower 32 remain 0 after any load of CR0 */ +#define HVM_CR0_GUEST_RESERVED_BITS \ + ~(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | \ + X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | \ + X86_CR0_WP | X86_CR0_AM | X86_CR0_NW | \ + X86_CR0_CD | X86_CR0_PG) + /* These bits in CR4 are owned by the host. */ #define HVM_CR4_HOST_MASK (mmu_cr4_features & \ (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE)) -- 2.30.2